For the same cats vs dog image classifier:
!wget --no-check-certificate \
https://storage.googleapis.com/mledu-datasets/cats_and_dogs_filtered.zip \
-O /tmp/cats_and_dogs_filtered.zip
import os
import zipfile
local_zip='/tmp/cats_and_dogs_filtered.zip'
zip_ref=zipfile.ZipFile(local_zip,'r')
zip_ref.extractall('/tmp')
zip_ref.close()
os.listdir('/tmp/cats_and_dogs_filtered')
os.listdir('/tmp/cats_and_dogs_filtered/train')
print(len(os.listdir('/tmp/cats_and_dogs_filtered/train/dogs')))
print(len(os.listdir('/tmp/cats_and_dogs_filtered/train/cats')))
base_dir='/tmp/cats_and_dogs_filtered'
train_dir=os.path.join(base_dir,'train')
validation_dir=os.path.join(base_dir,'validation')
train_dogs_dir=os.path.join(train_dir,'dogs')
train_cats_dir=os.path.join(train_dir,'cats')
val_dogs_dir=os.path.join(validation_dir,'dogs')
val_cats_dir=os.path.join(validation_dir,'cats')
train_cats_filenames=os.listdir(train_cats_dir)
train_cats_filenames[:4]
import matplotlib.pyplot as plt
%matplotlib inline
plt.imshow(plt.imread(os.path.join(train_cats_dir,train_cats_filenames[0])))
import random
images=[]
plt.figure(figsize=(16,16))
for i in range(16):
plt.subplot(4,4,i+1)
imgname=random.choice(train_cats_filenames)
images.append(imgname)
img=plt.imread(os.path.join(train_cats_dir,imgname))
plt.imshow(img)
plt.title(img.shape)
plt.figure(figsize=(16,16))
j=1
for i in images:
img=plt.imread(os.path.join(train_cats_dir,i))
plt.subplot(4,4,j)
plt.hist(img.flat)
j+=1
train_dogs_filenames=os.listdir(train_dogs_dir)
train_dogs_filenames[:4]
dogs_images=[]
plt.figure(figsize=(16,16))
for i in range(16):
plt.subplot(4,4,i+1)
imgname=random.choice(train_dogs_filenames)
dogs_images.append(imgname)
img=plt.imread(os.path.join(train_dogs_dir,imgname))
plt.imshow(img)
plt.title(img.shape)
plt.figure(figsize=(16,16))
j=1
for i in dogs_images:
img=plt.imread(os.path.join(train_dogs_dir,i))
plt.subplot(4,4,j)
plt.hist(img.flat)
j+=1
from tensorflow.keras.preprocessing.image import ImageDataGenerator
train_datagen = ImageDataGenerator(rescale=1./255)
val_datagen = ImageDataGenerator(rescale=1./255)
train_generator = train_datagen.flow_from_directory(train_dir,target_size=(150, 150),batch_size=20,class_mode='binary')
validation_generator = val_datagen.flow_from_directory(validation_dir,target_size=(150, 150),batch_size=20,class_mode='binary')
img,labels=train_generator.next()
print(img.shape)
print(labels.shape)
plt.figure(figsize=(16,16))
for i in range(20):
plt.subplot(4,5,i+1)
plt.imshow(img[i,:,:,:])
plt.title(labels[i])
plt.axis("off")
## import required methods
from tensorflow.keras.models import Sequential
from tensorflow.keras.layers import Conv2D,Dense,Flatten,MaxPooling2D
model = Sequential()
## add a conv layer followed by maxpooling
model.add(Conv2D(16,3,activation='relu',input_shape=(150,150,3)))
model.add(MaxPooling2D(2))
## add a conv layer followed by maxpooling
model.add(Conv2D(32,3,activation='relu',input_shape=(150,150,3)))
model.add(MaxPooling2D(2))
## add a conv layer followed by maxpooling
model.add(Conv2D(64,3,activation='relu',input_shape=(150,150,3)))
model.add(MaxPooling2D(2))
# Convert the featuremap into 1D array
model.add(Flatten())
# Fully connected layer with 512 neurons
model.add(Dense(512,activation='relu'))
## Final output layer
model.add(Dense(1,activation='sigmoid'))
#let us see the summary
model.summary()
### Compiling the model
import tensorflow as tf
model.compile(loss=tf.keras.losses.BinaryCrossentropy(),metrics=['accuracy'])
history=model.fit(train_generator,epochs=15,validation_data=validation_generator,batch_size=32)
train_acc = history.history['accuracy']
val_acc = history.history['val_accuracy']
train_loss = history.history['loss']
val_loss = history.history['val_loss']
epochs = list(range(1,16))
plt.figure(figsize=(16,4))
plt.subplot(1,2,1)
plt.plot(epochs,train_acc,label='train_acc')
plt.plot(epochs,val_acc,label='val_acc')
plt.title('accuracy')
plt.legend()
plt.subplot(1,2,2)
plt.plot(epochs,train_loss,label='train_loss')
plt.plot(epochs,val_loss,label='val_loss')
plt.title('loss')
plt.legend()
model1 = Sequential()
## add a conv layer followed by maxpooling
model1.add(Conv2D(128,3,activation='relu',input_shape=(150,150,3)))
model1.add(MaxPooling2D(2))
## add a conv layer followed by maxpooling
model1.add(Conv2D(64,3,activation='relu',input_shape=(150,150,3)))
model1.add(MaxPooling2D(2))
## add a conv layer followed by maxpooling
model1.add(Conv2D(64,3,activation='relu',input_shape=(150,150,3)))
model1.add(MaxPooling2D(2))
## add a conv layer followed by maxpooling
model1.add(Conv2D(32,3,activation='relu',input_shape=(150,150,3)))
model1.add(MaxPooling2D(2))
# Convert the featuremap into 1D array
model1.add(Flatten())
# Fully connected layer with 512 neurons
model1.add(Dense(512,activation='relu'))
## Final output layer
model1.add(Dense(1,activation='softmax'))
#let us see the summary
model.summary()
model1.compile(loss=tf.keras.losses.BinaryCrossentropy(),metrics=['accuracy'])
history1=model.fit(train_generator,epochs=7,validation_data=validation_generator,batch_size=32)
train_acc = history1.history['accuracy']
val_acc = history1.history['val_accuracy']
train_loss = history1.history['loss']
val_loss = history1.history['val_loss']
epochs = list(range(1,8))
plt.figure(figsize=(16,4))
plt.subplot(1,2,1)
plt.plot(epochs,train_acc,label='train_acc')
plt.plot(epochs,val_acc,label='val_acc')
plt.title('accuracy')
plt.legend()
plt.subplot(1,2,2)
plt.plot(epochs,train_loss,label='train_loss')
plt.plot(epochs,val_loss,label='val_loss')
plt.title('loss')
plt.legend()
My 2nd model has overfitting issue
And 1st model at